void arch_domain_destroy(struct domain *d)
{
shadow_teardown(d);
+ /* shared_info is part of the RMA so no need to release it */
}
static void machine_fail(const char *s)
void domain_relinquish_resources(struct domain *d)
{
+ relinquish_memory(d, &d->xenpage_list);
relinquish_memory(d, &d->page_list);
free_extents(d);
return;
unsigned long max_page;
unsigned long total_pages;
+void __init init_frametable(void)
+{
+ unsigned long p;
+ unsigned long nr_pages;
+ int i;
+
+ nr_pages = PFN_UP(max_page * sizeof(struct page_info));
+
+ p = alloc_boot_pages(nr_pages, 1);
+ if (p == 0)
+ panic("Not enough memory for frame table\n");
+
+ frame_table = (struct page_info *)(p << PAGE_SHIFT);
+ for (i = 0; i < nr_pages; i += 1)
+ clear_page((void *)((p + i) << PAGE_SHIFT));
+}
+
+void share_xen_page_with_guest(
+ struct page_info *page, struct domain *d, int readonly)
+{
+ if ( page_get_owner(page) == d )
+ return;
+
+ /* this causes us to leak pages in the Domain and reuslts in
+ * Zombie domains, I think we are missing a piece, until we find
+ * it we disable the following code */
+ set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
+
+ spin_lock(&d->page_alloc_lock);
+
+ /* The incremented type count pins as writable or read-only. */
+ page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page);
+ page->u.inuse.type_info |= PGT_validated | 1;
+
+ page_set_owner(page, d);
+ wmb(); /* install valid domain ptr before updating refcnt. */
+ ASSERT(page->count_info == 0);
+ page->count_info |= PGC_allocated | 1;
+
+ if ( unlikely(d->xenheap_pages++ == 0) )
+ get_knownalive_domain(d);
+ list_add_tail(&page->list, &d->xenpage_list);
+
+ spin_unlock(&d->page_alloc_lock);
+}
+
+void share_xen_page_with_privileged_guests(
+ struct page_info *page, int readonly)
+{
+ unimplemented();
+}
+
+static int create_grant_va_mapping(
+ unsigned long va, unsigned long frame, struct vcpu *v)
+{
+ if (v->domain->domain_id != 0) {
+ printk("only Dom0 can map a grant entry\n");
+ BUG();
+ return GNTST_permission_denied;
+ }
+ return GNTST_okay;
+}
+
+static int destroy_grant_va_mapping(
+ unsigned long addr, unsigned long frame, struct domain *d)
+{
+ if (d->domain_id != 0) {
+ printk("only Dom0 can map a grant entry\n");
+ BUG();
+ return GNTST_permission_denied;
+ }
+ return GNTST_okay;
+}
+
int create_grant_host_mapping(
unsigned long addr, unsigned long frame, unsigned int flags)
{
- panic("%s called\n", __func__);
- return 1;
+ if (flags & GNTMAP_application_map) {
+ printk("%s: GNTMAP_application_map not supported\n", __func__);
+ BUG();
+ return GNTST_general_error;
+ }
+ if (flags & GNTMAP_contains_pte) {
+ printk("%s: GNTMAP_contains_pte not supported\n", __func__);
+ BUG();
+ return GNTST_general_error;
+ }
+ return create_grant_va_mapping(addr, frame, current);
}
int destroy_grant_host_mapping(
unsigned long addr, unsigned long frame, unsigned int flags)
{
- panic("%s called\n", __func__);
- return 1;
+ if (flags & GNTMAP_contains_pte) {
+ printk("%s: GNTMAP_contains_pte not supported\n", __func__);
+ BUG();
+ return GNTST_general_error;
+ }
+
+ /* may have force the remove here */
+ return destroy_grant_va_mapping(addr, frame, current->domain);
}
int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
{
return 0;
}
- if ( unlikely(!(x & PGT_validated)) )
+ else if ( unlikely(!(x & PGT_validated)) )
{
/* Someone else is updating validation of this page. Wait... */
while ( (y = page->u.inuse.type_info) == x )
return 1;
}
-void __init init_frametable(void)
-{
- unsigned long p;
- unsigned long nr_pages;
- int i;
-
- nr_pages = PFN_UP(max_page * sizeof(struct page_info));
-
- p = alloc_boot_pages(nr_pages, 1);
- if (p == 0)
- panic("Not enough memory for frame table\n");
-
- frame_table = (struct page_info *)(p << PAGE_SHIFT);
- for (i = 0; i < nr_pages; i += 1)
- clear_page((void *)((p + i) << PAGE_SHIFT));
-}
-
long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
{
printk("%s: no PPC specific memory ops\n", __func__);
struct page_extents *pe;
ulong mfn = INVALID_MFN;
int t = PFN_TYPE_NONE;
+ ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
/* quick tests first */
- if (d->is_privileged && cpu_io_mfn(pfn)) {
+ if (pfn & foreign_map_pfn) {
+ t = PFN_TYPE_FOREIGN;
+ mfn = pfn & ~(foreign_map_pfn);
+ } else if (pfn >= max_page && pfn < (max_page + NR_GRANT_FRAMES)) {
+ /* Its a grant table access */
+ t = PFN_TYPE_GNTTAB;
+ mfn = gnttab_shared_mfn(d, d->grant_table, (pfn - max_page));
+ } else if (test_bit(_DOMF_privileged, &d->domain_flags) &&
+ cpu_io_mfn(pfn)) {
t = PFN_TYPE_IO;
mfn = pfn;
} else {
return mfn;
}
+unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
+{
+ struct page_extents *pe;
+ ulong cur_pfn;
+ ulong gnttab_mfn;
+ ulong rma_mfn;
+
+ /* grant? */
+ gnttab_mfn = gnttab_shared_mfn(d, d->grant_table, 0);
+ if (mfn >= gnttab_mfn && mfn < (gnttab_mfn + NR_GRANT_FRAMES))
+ return max_page + (mfn - gnttab_mfn);
+
+ /* IO? */
+ if (test_bit(_DOMF_privileged, &d->domain_flags) &&
+ cpu_io_mfn(mfn))
+ return mfn;
+
+ rma_mfn = page_to_mfn(d->arch.rma_page);
+ if (mfn >= rma_mfn &&
+ mfn < (rma_mfn + (1 << d->arch.rma_order)))
+ return mfn - rma_mfn;
+
+ /* Extent? */
+ cur_pfn = 1UL << d->arch.rma_order;
+ list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+ uint pe_pages = 1UL << pe->order;
+ uint b_mfn = page_to_mfn(pe->pg);
+ uint e_mfn = b_mfn + pe_pages;
+
+ if (mfn >= b_mfn && mfn < e_mfn) {
+ return cur_pfn + (mfn - b_mfn);
+ }
+ cur_pfn += pe_pages;
+ }
+ return INVALID_M2P_ENTRY;
+}
+
void guest_physmap_add_page(
struct domain *d, unsigned long gpfn, unsigned long mfn)
{
if (!rtas_entry)
ofd_prop_add(m, n, "power-control", NULL, 0);
+ /* tell dom0 where ranted pages go in the linear map */
+ val[0] = cpu_foreign_map_order();
+ val[1] = max_page;
+ ofd_prop_add(m, n, "foreign-map", val, sizeof (val));
+
n = ofd_node_add(m, n, console, sizeof (console));
if (n > 0) {
val[0] = 0;
}
-static void h_enter(struct cpu_user_regs *regs)
+long pte_enter(ulong flags, ulong ptex, ulong vsid, ulong rpn)
{
- ulong flags = regs->gprs[4];
- ulong ptex = regs->gprs[5];
-
union pte pte;
union pte volatile *ppte;
struct domain_htab *htab;
htab = &d->arch.htab;
if (ptex > (1UL << htab->log_num_ptes)) {
DBG("%s: bad ptex: 0x%lx\n", __func__, ptex);
- regs->gprs[3] = H_Parameter;
- return;
+ return H_Parameter;
}
/* use local HPTE to avoid manual shifting & masking */
- pte.words.vsid = regs->gprs[6];
- pte.words.rpn = regs->gprs[7];
+ pte.words.vsid = vsid;
+ pte.words.rpn = rpn;
if ( pte.bits.l ) { /* large page? */
/* figure out the page size for the selected large page */
if ( lp_size >= d->arch.large_page_sizes ) {
DBG("%s: attempt to use unsupported lp_size %d\n",
__func__, lp_size);
- regs->gprs[3] = H_Parameter;
- return;
+ return H_Parameter;
}
/* get correct pgshift value */
mfn = pfn2mfn(d, pfn, &mtype);
if (mfn == INVALID_MFN) {
DBG("%s: Bad PFN: 0x%lx\n", __func__, pfn);
- regs->gprs[3] = H_Parameter;
- return;
+ return H_Parameter;
}
- if (mtype == PFN_TYPE_IO) {
+ if (mtype == PFN_TYPE_IO &&!test_bit(_DOMF_privileged, &d->domain_flags)) {
/* only a privilaged dom can access outside IO space */
- if ( !d->is_privileged ) {
- DBG("%s: unprivileged access to physical page: 0x%lx\n",
- __func__, pfn);
- regs->gprs[3] = H_Privilege;
- return;
- }
-
+ DBG("%s: unprivileged access to physical page: 0x%lx\n",
+ __func__, pfn);
+ return H_Privilege;
+ }
+ if (mtype == PFN_TYPE_IO) {
if ( !((pte.bits.w == 0)
&& (pte.bits.i == 1)
&& (pte.bits.g == 1)) ) {
"w=%x i=%d m=%d, g=%d\n word 0x%lx\n", __func__,
pte.bits.w, pte.bits.i, pte.bits.m, pte.bits.g,
pte.words.rpn);
- regs->gprs[3] = H_Parameter;
- return;
+ return H_Parameter;
}
}
+ if (mtype == PFN_TYPE_GNTTAB) {
+ DBG("%s: Dom[%d] mapping grant table: 0x%lx\n",
+ __func__, d->domain_id, pfn << PAGE_SHIFT);
+ pte.bits.i = 0;
+ }
/* fixup the RPN field of our local PTE copy */
pte.bits.rpn = mfn | lp_bits;
if (unlikely(!get_domain(f))) {
DBG("%s: Rescinded, no domain: 0x%lx\n", __func__, pfn);
- regs->gprs[3] = H_Rescinded;
- return;
+ return H_Rescinded;
}
if (unlikely(!get_page(pg, f))) {
put_domain(f);
DBG("%s: Rescinded, no page: 0x%lx\n", __func__, pfn);
- regs->gprs[3] = H_Rescinded;
- return;
+ return H_Rescinded;
}
}
: "b" (ppte), "r" (pte.words.rpn), "r" (pte.words.vsid)
: "memory");
- regs->gprs[3] = H_Success;
- regs->gprs[4] = idx;
-
- return;
+ return idx;
}
}
if (f != NULL)
put_domain(f);
- regs->gprs[3] = H_PTEG_Full;
+ return H_PTEG_Full;
+}
+
+static void h_enter(struct cpu_user_regs *regs)
+{
+ ulong flags = regs->gprs[4];
+ ulong ptex = regs->gprs[5];
+ ulong vsid = regs->gprs[6];
+ ulong rpn = regs->gprs[7];
+ long ret;
+
+ ret = pte_enter(flags, ptex, vsid, rpn);
+
+ if (ret >= 0) {
+ regs->gprs[3] = H_Success;
+ regs->gprs[4] = ret;
+ } else
+ regs->gprs[3] = ret;
}
static void h_protect(struct cpu_user_regs *regs)
/* the AVPN param occupies the bit-space of the word */
if ( (flags & H_AVPN) && lpte.bits.avpn != avpn >> 7 ) {
- DBG("%s: %p: AVPN check failed: 0x%lx, 0x%lx\n", __func__,
+ DBG_LOW("%s: %p: AVPN check failed: 0x%lx, 0x%lx\n", __func__,
ppte, lpte.words.vsid, lpte.words.rpn);
regs->gprs[3] = H_Not_Found;
return;
}
}
-static void h_remove(struct cpu_user_regs *regs)
+long pte_remove(ulong flags, ulong ptex, ulong avpn, ulong *hi, ulong *lo)
{
- ulong flags = regs->gprs[4];
- ulong ptex = regs->gprs[5];
- ulong avpn = regs->gprs[6];
struct vcpu *v = get_current();
struct domain *d = v->domain;
struct domain_htab *htab = &d->arch.htab;
if ( ptex > (1UL << htab->log_num_ptes) ) {
DBG("%s: bad ptex: 0x%lx\n", __func__, ptex);
- regs->gprs[3] = H_Parameter;
- return;
+ return H_Parameter;
}
pte = &htab->map[ptex];
lpte.words.vsid = pte->words.vsid;
lpte.words.rpn = pte->words.rpn;
if ((flags & H_AVPN) && lpte.bits.avpn != (avpn >> 7)) {
- DBG("%s: avpn doesn not match\n", __func__);
- regs->gprs[3] = H_Not_Found;
- return;
+ DBG_LOW("%s: AVPN does not match\n", __func__);
+ return H_Not_Found;
}
if ((flags & H_ANDCOND) && ((avpn & pte->words.vsid) != 0)) {
DBG("%s: andcond does not match\n", __func__);
- regs->gprs[3] = H_Not_Found;
- return;
+ return H_Not_Found;
}
- regs->gprs[3] = H_Success;
/* return old PTE in regs 4 and 5 */
- regs->gprs[4] = lpte.words.vsid;
- regs->gprs[5] = lpte.words.rpn;
+ *hi = lpte.words.vsid;
+ *lo = lpte.words.rpn;
#ifdef DEBUG_LOW
/* XXX - I'm very skeptical of doing ANYTHING if not bits.v */
if (!cpu_io_mfn(mfn)) {
struct page_info *pg = mfn_to_page(mfn);
struct domain *f = page_get_owner(pg);
-
+
if (f != d) {
put_domain(f);
put_page(pg);
: "memory");
pte_tlbie(&lpte, ptex);
+
+ return H_Success;
+}
+
+static void h_remove(struct cpu_user_regs *regs)
+{
+ ulong flags = regs->gprs[4];
+ ulong ptex = regs->gprs[5];
+ ulong avpn = regs->gprs[6];
+ ulong hi, lo;
+ long ret;
+
+ ret = pte_remove(flags, ptex, avpn, &hi, &lo);
+
+ regs->gprs[3] = ret;
+
+ if (ret == H_Success) {
+ regs->gprs[4] = hi;
+ regs->gprs[5] = lo;
+ }
+ return;
}
static void h_read(struct cpu_user_regs *regs)
* Caller must own caller's BIGLOCK, is responsible for flushing the TLB, and
* must hold a reference to the page.
*/
+extern long pte_enter(ulong flags, ulong ptex, ulong vsid, ulong rpn);
+extern long pte_remove(ulong flags, ulong ptex, ulong avpn,
+ ulong *hi, ulong *lo);
+
int create_grant_host_mapping(
unsigned long addr, unsigned long frame, unsigned int flags);
int destroy_grant_host_mapping(
(d), XENSHARE_writable); \
} while ( 0 )
-#define gnttab_shared_mfn(d, t, i) \
- ((virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i))
+#define gnttab_shared_mfn(d, t, i) (((ulong)((t)->shared) >> PAGE_SHIFT) + (i))
#define gnttab_shared_gmfn(d, t, i) \
(mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
clear_bit(lnr, laddr);
}
+static inline uint cpu_foreign_map_order(void)
+{
+ /* 16 GiB */
+ return 34 - PAGE_SHIFT;
+}
+
+#define GNTTAB_DEV_BUS(f) \
+ ((f) | (1UL << (cpu_foreign_map_order() + PAGE_SHIFT)))
+
#endif /* __ASM_PPC_GRANT_TABLE_H__ */
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2005
+ * Copyright (C) IBM Corp. 2005, 2006
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Jimi Xenidis <jimix@watson.ibm.com>
*/
#ifndef _ASM_MM_H_
};
/* The following page types are MUTUALLY EXCLUSIVE. */
-#define PGT_none (0<<29) /* no special uses of this page */
-#define PGT_RMA (1<<29) /* This page is an RMA page? */
-#define PGT_writable_page (7<<29) /* has writable mappings of this page? */
-#define PGT_type_mask (7<<29) /* Bits 29-31. */
+#define PGT_none (0UL<<29) /* no special uses of this page */
+#define PGT_RMA (1UL<<29) /* This page is an RMA page? */
+#define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */
+#define PGT_type_mask (7UL<<29) /* Bits 29-31. */
/* Owning guest has pinned this page to its current type? */
#define _PGT_pinned 28
-#define PGT_pinned (1U<<_PGT_pinned)
+#define PGT_pinned (1UL<<_PGT_pinned)
/* Has this page been validated for use as its current type? */
#define _PGT_validated 27
-#define PGT_validated (1U<<_PGT_validated)
+#define PGT_validated (1UL<<_PGT_validated)
/* 16-bit count of uses of this frame as its current type. */
-#define PGT_count_mask ((1U<<16)-1)
+#define PGT_count_mask ((1UL<<16)-1)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated 31
-#define PGC_allocated (1U<<_PGC_allocated)
+#define PGC_allocated (1UL<<_PGC_allocated)
/* Set on a *guest* page to mark it out-of-sync with its shadow */
#define _PGC_out_of_sync 30
-#define PGC_out_of_sync (1U<<_PGC_out_of_sync)
+#define PGC_out_of_sync (1UL<<_PGC_out_of_sync)
/* Set when is using a page as a page table */
#define _PGC_page_table 29
-#define PGC_page_table (1U<<_PGC_page_table)
+#define PGC_page_table (1UL<<_PGC_page_table)
/* Set when using page for RMA */
#define _PGC_page_RMA 28
-#define PGC_page_RMA (1U<<_PGC_page_RMA)
+#define PGC_page_RMA (1UL<<_PGC_page_RMA)
/* 29-bit count of references to this frame. */
-#define PGC_count_mask ((1U<<28)-1)
+#define PGC_count_mask ((1UL<<28)-1)
#define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
+#define XENSHARE_writable 0
+#define XENSHARE_readonly 1
+extern void share_xen_page_with_guest(
+ struct page_info *page, struct domain *d, int readonly);
+extern void share_xen_page_with_privileged_guests(
+ struct page_info *page, int readonly);
+
extern struct page_info *frame_table;
extern unsigned long max_page;
extern unsigned long total_pages;
} vm_assist_info_t;
extern vm_assist_info_t vm_assist_info[];
-#define share_xen_page_with_guest(p, d, r) do { } while (0)
-#define share_xen_page_with_privileged_guests(p, r) do { } while (0)
/* hope that accesses to this will fail spectacularly */
-#define machine_to_phys_mapping ((u32 *)-1UL)
+#undef machine_to_phys_mapping
+#define INVALID_M2P_ENTRY (~0UL)
+
+/* do nothing, its all calculated */
+#define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
+#define get_gpfn_from_mfn(mfn) (mfn)
+
+extern unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn);
+
+extern unsigned long paddr_to_maddr(unsigned long paddr);
+
+#define INVALID_MFN (~0UL)
+#define PFN_TYPE_NONE 0
+#define PFN_TYPE_RMA 1
+#define PFN_TYPE_LOGICAL 2
+#define PFN_TYPE_IO 3
+#define PFN_TYPE_FOREIGN 4
+#define PFN_TYPE_GNTTAB 5
+
+extern ulong pfn2mfn(struct domain *d, ulong pfn, int *type);
+static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gmfn)
+{
+ int mtype;
+ ulong mfn;
+
+ mfn = pfn2mfn(d, gmfn, &mtype);
+ if (mfn != INVALID_MFN) {
+ switch (mtype) {
+ case PFN_TYPE_RMA:
+ case PFN_TYPE_LOGICAL:
+ break;
+ default:
+ WARN();
+ mfn = INVALID_MFN;
+ break;
+ }
+ }
+ return mfn;
+}
extern int update_grant_va_mapping(unsigned long va,
unsigned long val,